#if 0
unsigned short nmi_high = 0, nmi_low = 0;
#endif
- full_execution_context_t ctxt;
+ vcpu_guest_context_t ctxt;
extern void startup_32_smp(void);
extern void hypervisor_callback(void);
extern void failsafe_callback(void);
memset(&ctxt, 0, sizeof(ctxt));
- ctxt.cpu_ctxt.ds = __USER_DS;
- ctxt.cpu_ctxt.es = __USER_DS;
- ctxt.cpu_ctxt.fs = 0;
- ctxt.cpu_ctxt.gs = 0;
- ctxt.cpu_ctxt.ss = __KERNEL_DS;
- ctxt.cpu_ctxt.cs = __KERNEL_CS;
- ctxt.cpu_ctxt.eip = start_eip;
- ctxt.cpu_ctxt.esp = idle->thread.esp;
- ctxt.cpu_ctxt.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
+ ctxt.user_regs.ds = __USER_DS;
+ ctxt.user_regs.es = __USER_DS;
+ ctxt.user_regs.fs = 0;
+ ctxt.user_regs.gs = 0;
+ ctxt.user_regs.ss = __KERNEL_DS;
+ ctxt.user_regs.cs = __KERNEL_CS;
+ ctxt.user_regs.eip = start_eip;
+ ctxt.user_regs.esp = idle->thread.esp;
+ ctxt.user_regs.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
/* FPU is set up to default initial state. */
memset(ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
static inline int
HYPERVISOR_boot_vcpu(
- unsigned long vcpu, full_execution_context_t *ctxt)
+ unsigned long vcpu, vcpu_guest_context_t *ctxt)
{
int ret;
unsigned long ign1, ign2;
static inline int
HYPERVISOR_boot_vcpu(
- unsigned long vcpu, full_execution_context_t *ctxt)
+ unsigned long vcpu, vcpu_guest_context_t *ctxt)
{
int ret;
u32 domid,
u32 vcpu,
xc_domaininfo_t *info,
- full_execution_context_t *ctxt);
+ vcpu_guest_context_t *ctxt);
int xc_domain_setcpuweight(int xc_handle,
u32 domid,
float weight);
u32 domid,
u32 vcpu,
xc_domaininfo_t *info,
- full_execution_context_t *ctxt)
+ vcpu_guest_context_t *ctxt)
{
int rc, errno_saved;
dom0_op_t op;
gzFile initrd_gfd, unsigned long initrd_len,
unsigned long nr_pages,
unsigned long *pvsi, unsigned long *pvke,
- full_execution_context_t *ctxt,
+ vcpu_guest_context_t *ctxt,
const char *cmdline,
unsigned long shared_info_frame,
unsigned int control_evtchn,
int initrd_fd = -1;
gzFile initrd_gfd = NULL;
int rc, i;
- full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
+ vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
unsigned long nr_pages;
char *image = NULL;
unsigned long image_size, initrd_size=0;
* [EAX,EBX,ECX,EDX,EDI,EBP are zero]
* EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
*/
- ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
- ctxt->cpu_ctxt.eip = vkern_entry;
- ctxt->cpu_ctxt.esp = vstartinfo_start + 2*PAGE_SIZE;
- ctxt->cpu_ctxt.esi = vstartinfo_start;
- ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2);
+ ctxt->user_regs.ds = FLAT_KERNEL_DS;
+ ctxt->user_regs.es = FLAT_KERNEL_DS;
+ ctxt->user_regs.fs = FLAT_KERNEL_DS;
+ ctxt->user_regs.gs = FLAT_KERNEL_DS;
+ ctxt->user_regs.ss = FLAT_KERNEL_DS;
+ ctxt->user_regs.cs = FLAT_KERNEL_CS;
+ ctxt->user_regs.eip = vkern_entry;
+ ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE;
+ ctxt->user_regs.esi = vstartinfo_start;
+ ctxt->user_regs.eflags = (1<<9) | (1<<2);
/* FPU is set up to default initial state. */
memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
shared_info_t *shared_info = (shared_info_t *)shared_info_page;
/* A copy of the CPU context of the guest. */
- full_execution_context_t ctxt;
+ vcpu_guest_context_t ctxt;
/* First 16 bytes of the state file must contain 'LinuxGuestRecord'. */
char signature[16];
}
/* Uncanonicalise the suspend-record frame number and poke resume rec. */
- pfn = ctxt.cpu_ctxt.esi;
+ pfn = ctxt.user_regs.esi;
if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) )
{
xcio_error(ioctxt, "Suspend record frame number is bad");
goto out;
}
- ctxt.cpu_ctxt.esi = mfn = pfn_to_mfn_table[pfn];
+ ctxt.user_regs.esi = mfn = pfn_to_mfn_table[pfn];
p_srec = xc_map_foreign_range(
xc_handle, dom, PAGE_SIZE, PROT_WRITE, mfn);
p_srec->resume_info.nr_pages = nr_pfns;
/*
* Safety checking of saved context:
- * 1. cpu_ctxt is fine, as Xen checks that on context switch.
+ * 1. user_regs is fine, as Xen checks that on context switch.
* 2. fpu_ctxt is fine, as it can't hurt Xen.
* 3. trap_ctxt needs the code selectors checked.
* 4. fast_trap_idx is checked by Xen.
int suspend_and_state(int xc_handle, XcIOContext *ioctxt,
xc_domaininfo_t *info,
- full_execution_context_t *ctxt)
+ vcpu_guest_context_t *ctxt)
{
int i=0;
unsigned long shared_info_frame;
/* A copy of the CPU context of the guest. */
- full_execution_context_t ctxt;
+ vcpu_guest_context_t ctxt;
/* A table containg the type of each PFN (/not/ MFN!). */
unsigned long *pfn_type = NULL;
"SUSPEND flags %08u shinfo %08lx eip %08u "
"esi %08u\n",info.flags,
info.shared_info_frame,
- ctxt.cpu_ctxt.eip, ctxt.cpu_ctxt.esi );
+ ctxt.user_regs.eip, ctxt.user_regs.esi );
}
if ( xc_shadow_control( xc_handle, domid,
domid for this to succeed. */
p_srec = xc_map_foreign_range(xc_handle, domid,
sizeof(*p_srec), PROT_READ,
- ctxt.cpu_ctxt.esi);
+ ctxt.user_regs.esi);
if (!p_srec){
xcio_error(ioctxt, "Couldn't map suspend record");
goto out;
}
/* Canonicalise the suspend-record frame number. */
- if ( !translate_mfn_to_pfn(&ctxt.cpu_ctxt.esi) ){
+ if ( !translate_mfn_to_pfn(&ctxt.user_regs.esi) ){
xcio_error(ioctxt, "Suspend record is not in range of pseudophys map");
goto out;
}
unsigned long tot_pages,
unsigned long *virt_startinfo_addr,
unsigned long *virt_load_addr,
- full_execution_context_t * ctxt,
+ vcpu_guest_context_t * ctxt,
const char *cmdline,
unsigned long shared_info_frame,
unsigned int control_evtchn,
int kernel_fd = -1;
gzFile kernel_gfd = NULL;
int rc, i;
- full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
+ vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
unsigned long virt_startinfo_addr;
if ((tot_pages = xc_get_tot_pages(xc_handle, domid)) < 0) {
* [EAX,EBX,ECX,EDX,EDI,EBP are zero]
* EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
*/
- ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
- ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
- ctxt->cpu_ctxt.eip = load_addr;
- ctxt->cpu_ctxt.eip = 0x80100020;
+ ctxt->user_regs.ds = FLAT_KERNEL_DS;
+ ctxt->user_regs.es = FLAT_KERNEL_DS;
+ ctxt->user_regs.fs = FLAT_KERNEL_DS;
+ ctxt->user_regs.gs = FLAT_KERNEL_DS;
+ ctxt->user_regs.ss = FLAT_KERNEL_DS;
+ ctxt->user_regs.cs = FLAT_KERNEL_CS;
+ ctxt->user_regs.eip = load_addr;
+ ctxt->user_regs.eip = 0x80100020;
/* put stack at top of second page */
- ctxt->cpu_ctxt.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
+ ctxt->user_regs.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
/* why is this set? */
- ctxt->cpu_ctxt.esi = ctxt->cpu_ctxt.esp;
- ctxt->cpu_ctxt.eflags = (1 << 9) | (1 << 2);
+ ctxt->user_regs.esi = ctxt->user_regs.esp;
+ ctxt->user_regs.eflags = (1 << 9) | (1 << 2);
/* FPU is set up to default initial state. */
memset(ctxt->fpu_ctxt, 0, sizeof (ctxt->fpu_ctxt));
/* Ring 1 stack is the initial stack. */
/* put stack at top of second page */
ctxt->kernel_ss = FLAT_KERNEL_DS;
- ctxt->kernel_esp = ctxt->cpu_ctxt.esp;
+ ctxt->kernel_esp = ctxt->user_regs.esp;
/* No debugging. */
memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
unsigned long *page_array = NULL;
static int regs_valid[MAX_VIRT_CPUS];
static unsigned long cr3[MAX_VIRT_CPUS];
-static full_execution_context_t ctxt[MAX_VIRT_CPUS];
+static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
/* --------------------- */
{
dom0_op_t op;
int retval;
- full_execution_context_t ctxt;
+ vcpu_guest_context_t ctxt;
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 10*1000*1000;
FETCH_REGS(cpu);
if (request == PTRACE_GETREGS) {
- SET_PT_REGS(pt, ctxt[cpu].cpu_ctxt);
+ SET_PT_REGS(pt, ctxt[cpu].user_regs);
memcpy(data, &pt, sizeof(elf_gregset_t));
} else if (request == PTRACE_GETFPREGS)
memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
break;
case PTRACE_SETREGS:
op.cmd = DOM0_SETDOMAININFO;
- SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].cpu_ctxt);
+ SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].user_regs);
op.u.setdomaininfo.domain = domid;
/* XXX need to understand multiple exec_domains */
op.u.setdomaininfo.exec_domain = cpu;
retval = do_dom0_op(xc_handle, &op);
break;
case PTRACE_SINGLESTEP:
- ctxt[VCPU].cpu_ctxt.eflags |= PSL_T;
+ ctxt[VCPU].user_regs.eflags |= PSL_T;
op.cmd = DOM0_SETDOMAININFO;
op.u.setdomaininfo.domain = domid;
op.u.setdomaininfo.exec_domain = 0;
if (request != PTRACE_SINGLESTEP) {
FETCH_REGS(cpu);
/* Clear trace flag */
- if (ctxt[cpu].cpu_ctxt.eflags & PSL_T) {
- ctxt[cpu].cpu_ctxt.eflags &= ~PSL_T;
+ if (ctxt[cpu].user_regs.eflags & PSL_T) {
+ ctxt[cpu].user_regs.eflags &= ~PSL_T;
op.cmd = DOM0_SETDOMAININFO;
op.u.setdomaininfo.domain = domid;
op.u.setdomaininfo.exec_domain = cpu;
char *image, unsigned long image_size,
gzFile initrd_gfd, unsigned long initrd_len,
unsigned long nr_pages,
- full_execution_context_t *ctxt,
+ vcpu_guest_context_t *ctxt,
const char *cmdline,
unsigned long shared_info_frame,
unsigned int control_evtchn,
/*
* Initial register values:
*/
- ctxt->cpu_ctxt.ds = 0x68;
- ctxt->cpu_ctxt.es = 0x0;
- ctxt->cpu_ctxt.fs = 0x0;
- ctxt->cpu_ctxt.gs = 0x0;
- ctxt->cpu_ctxt.ss = 0x68;
- ctxt->cpu_ctxt.cs = 0x60;
- ctxt->cpu_ctxt.eip = dsi.v_kernentry;
- ctxt->cpu_ctxt.edx = vboot_gdt_start;
- ctxt->cpu_ctxt.eax = 0x800;
- ctxt->cpu_ctxt.esp = vboot_gdt_end;
- ctxt->cpu_ctxt.ebx = 0; /* startup_32 expects this to be 0 to signal boot cpu */
- ctxt->cpu_ctxt.ecx = mem_mapp->nr_map;
- ctxt->cpu_ctxt.esi = vboot_params_start;
- ctxt->cpu_ctxt.edi = vboot_params_start + 0x2d0;
-
- ctxt->cpu_ctxt.eflags = (1<<2);
+ ctxt->user_regs.ds = 0x68;
+ ctxt->user_regs.es = 0x0;
+ ctxt->user_regs.fs = 0x0;
+ ctxt->user_regs.gs = 0x0;
+ ctxt->user_regs.ss = 0x68;
+ ctxt->user_regs.cs = 0x60;
+ ctxt->user_regs.eip = dsi.v_kernentry;
+ ctxt->user_regs.edx = vboot_gdt_start;
+ ctxt->user_regs.eax = 0x800;
+ ctxt->user_regs.esp = vboot_gdt_end;
+ ctxt->user_regs.ebx = 0; /* startup_32 expects this to be 0 to signal boot cpu */
+ ctxt->user_regs.ecx = mem_mapp->nr_map;
+ ctxt->user_regs.esi = vboot_params_start;
+ ctxt->user_regs.edi = vboot_params_start + 0x2d0;
+
+ ctxt->user_regs.eflags = (1<<2);
return 0;
int initrd_fd = -1;
gzFile initrd_gfd = NULL;
int rc, i;
- full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
+ vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
unsigned long nr_pages;
char *image = NULL;
unsigned long image_size, initrd_size=0;
#include "xc.h"
#ifdef __i386__
-void
-print_ctx(full_execution_context_t *ctx1)
+void print_ctx(vcpu_guest_context_t *ctx1)
{
- execution_context_t *ctx = &ctx1->cpu_ctxt;
+ struct cpu_user_regs *regs = &ctx1->user_regs;
- printf("eip: %08lx\t", ctx->eip);
- printf("esp: %08lx\n", ctx->esp);
+ printf("eip: %08lx\t", regs->eip);
+ printf("esp: %08lx\n", regs->esp);
- printf("eax: %08lx\t", ctx->eax);
- printf("ebx: %08lx\t", ctx->ebx);
- printf("ecx: %08lx\t", ctx->ecx);
- printf("edx: %08lx\n", ctx->edx);
+ printf("eax: %08lx\t", regs->eax);
+ printf("ebx: %08lx\t", regs->ebx);
+ printf("ecx: %08lx\t", regs->ecx);
+ printf("edx: %08lx\n", regs->edx);
- printf("esi: %08lx\t", ctx->esi);
- printf("edi: %08lx\t", ctx->edi);
- printf("ebp: %08lx\n", ctx->ebp);
+ printf("esi: %08lx\t", regs->esi);
+ printf("edi: %08lx\t", regs->edi);
+ printf("ebp: %08lx\n", regs->ebp);
- printf(" cs: %08lx\t", ctx->cs);
- printf(" ds: %08lx\t", ctx->ds);
- printf(" fs: %08lx\t", ctx->fs);
- printf(" gs: %08lx\n", ctx->gs);
+ printf(" cs: %08lx\t", regs->cs);
+ printf(" ds: %08lx\t", regs->ds);
+ printf(" fs: %08lx\t", regs->fs);
+ printf(" gs: %08lx\n", regs->gs);
}
#endif
{
int ret;
xc_domaininfo_t info;
- full_execution_context_t ctx;
+ vcpu_guest_context_t ctx;
int xc_handle = xc_interface_open(); /* for accessing control interface */
return ret;
}
-void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
+void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c)
{
int i;
return;
}
-int arch_set_info_guest(struct exec_domain *p, full_execution_context_t *c)
+int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c)
{
dummy();
return 1;
}
-int arch_final_setup_guest(struct exec_domain *p, full_execution_context_t *c)
+int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c)
{
dummy();
return 1;
return;
}
-struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); }
+struct pt_regs *get_cpu_user_regs(void) { return ia64_task_regs(current); }
void raise_actimer_softirq(void)
{
return 1;
}
-void smp_apic_timer_interrupt(struct xen_regs * regs)
+void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
{
ack_APIC_irq();
perfc_incrc(apic_timer);
/*
* This interrupt should _never_ happen with our APIC/SMP architecture
*/
-asmlinkage void smp_spurious_interrupt(struct xen_regs *regs)
+asmlinkage void smp_spurious_interrupt(struct cpu_user_regs *regs)
{
unsigned long v;
* This interrupt should never happen with our APIC/SMP architecture
*/
-asmlinkage void smp_error_interrupt(struct xen_regs *regs)
+asmlinkage void smp_error_interrupt(struct cpu_user_regs *regs)
{
unsigned long v, v1;
}
static int
-handle_register_read_command(struct xen_regs *regs, struct xendbg_context *ctx)
+handle_register_read_command(struct cpu_user_regs *regs, struct xendbg_context *ctx)
{
char buf[121];
}
static int
-process_command(char *received_packet, struct xen_regs *regs,
+process_command(char *received_packet, struct cpu_user_regs *regs,
struct xendbg_context *ctx)
{
char *ptr;
};
int
-__trap_to_cdb(struct xen_regs *regs)
+__trap_to_cdb(struct cpu_user_regs *regs)
{
int resume = 0;
int r;
}
void arch_getdomaininfo_ctxt(
- struct exec_domain *ed, full_execution_context_t *c)
+ struct exec_domain *ed, struct vcpu_guest_context *c)
{
int i;
#ifdef __i386__ /* Remove when x86_64 VMX is implemented */
#ifdef CONFIG_VMX
- extern void save_vmx_execution_context(execution_context_t *);
+ extern void save_vmx_cpu_user_regs(struct cpu_user_regs *);
#endif
#endif
c->flags = 0;
- memcpy(&c->cpu_ctxt,
- &ed->arch.user_ctxt,
- sizeof(ed->arch.user_ctxt));
+ memcpy(&c->user_regs,
+ &ed->arch.user_regs,
+ sizeof(ed->arch.user_regs));
/* IOPL privileges are virtualised -- merge back into returned eflags. */
- BUG_ON((c->cpu_ctxt.eflags & EF_IOPL) != 0);
- c->cpu_ctxt.eflags |= ed->arch.iopl << 12;
+ BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
+ c->user_regs.eflags |= ed->arch.iopl << 12;
#ifdef __i386__
#ifdef CONFIG_VMX
if ( VMX_DOMAIN(ed) )
- save_vmx_execution_context(&c->cpu_ctxt);
+ save_vmx_cpu_user_regs(&c->user_regs);
#endif
#endif
if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
c->flags |= ECF_I387_VALID;
- if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
+ if ( KERNEL_MODE(ed, &ed->arch.user_regs) )
c->flags |= ECF_IN_KERNEL;
#ifdef CONFIG_VMX
if (VMX_DOMAIN(ed))
reset_stack_and_jump(vmx_asm_do_launch);
}
-static int vmx_final_setup_guest(struct exec_domain *ed,
- full_execution_context_t *full_context)
+static int vmx_final_setup_guest(
+ struct exec_domain *ed, struct vcpu_guest_context *ctxt)
{
int error;
- execution_context_t *context;
+ struct cpu_user_regs *regs;
struct vmcs_struct *vmcs;
- context = &full_context->cpu_ctxt;
+ regs = &ctxt->user_regs;
/*
* Create a new VMCS
ed->arch.arch_vmx.vmcs = vmcs;
error = construct_vmcs(
- &ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV);
+ &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
if ( error < 0 )
{
printk("Failed to construct a new VMCS\n");
#if defined (__i386)
ed->arch.arch_vmx.vmx_platform.real_mode_data =
- (unsigned long *) context->esi;
+ (unsigned long *) regs->esi;
#endif
if (ed == ed->domain->exec_domain[0]) {
/* This is called by arch_final_setup_guest and do_boot_vcpu */
int arch_set_info_guest(
- struct exec_domain *ed, full_execution_context_t *c)
+ struct exec_domain *ed, struct vcpu_guest_context *c)
{
struct domain *d = ed->domain;
unsigned long phys_basetab;
* If SS RPL or DPL differs from CS RPL then we'll #GP.
*/
if (!(c->flags & ECF_VMX_GUEST))
- if ( ((c->cpu_ctxt.cs & 3) == 0) ||
- ((c->cpu_ctxt.ss & 3) == 0) )
+ if ( ((c->user_regs.cs & 3) == 0) ||
+ ((c->user_regs.ss & 3) == 0) )
return -EINVAL;
clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
if ( c->flags & ECF_IN_KERNEL )
ed->arch.flags |= TF_kernel_mode;
- memcpy(&ed->arch.user_ctxt,
- &c->cpu_ctxt,
- sizeof(ed->arch.user_ctxt));
+ memcpy(&ed->arch.user_regs,
+ &c->user_regs,
+ sizeof(ed->arch.user_regs));
memcpy(&ed->arch.i387,
&c->fpu_ctxt,
sizeof(ed->arch.i387));
/* IOPL privileges are virtualised. */
- ed->arch.iopl = (ed->arch.user_ctxt.eflags >> 12) & 3;
- ed->arch.user_ctxt.eflags &= ~EF_IOPL;
+ ed->arch.iopl = (ed->arch.user_regs.eflags >> 12) & 3;
+ ed->arch.user_regs.eflags &= ~EF_IOPL;
/* Clear IOPL for unprivileged domains. */
if (!IS_PRIV(d))
- ed->arch.user_ctxt.eflags &= 0xffffcfff;
+ ed->arch.user_regs.eflags &= 0xffffcfff;
if (test_bit(EDF_DONEINIT, &ed->ed_flags))
return 0;
unsigned long start_stack,
unsigned long start_info)
{
- execution_context_t *ec = &d->arch.user_ctxt;
+ struct cpu_user_regs *regs = &d->arch.user_regs;
/*
* Initial register values:
* ESI = start_info
* [EAX,EBX,ECX,EDX,EDI,EBP are zero]
*/
- ec->ds = ec->es = ec->fs = ec->gs = FLAT_KERNEL_DS;
- ec->ss = FLAT_KERNEL_SS;
- ec->cs = FLAT_KERNEL_CS;
- ec->eip = start_pc;
- ec->esp = start_stack;
- ec->esi = start_info;
-
- __save_flags(ec->eflags);
- ec->eflags |= X86_EFLAGS_IF;
+ regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS;
+ regs->ss = FLAT_KERNEL_SS;
+ regs->cs = FLAT_KERNEL_CS;
+ regs->eip = start_pc;
+ regs->esp = start_stack;
+ regs->esi = start_info;
+
+ __save_flags(regs->eflags);
+ regs->eflags |= X86_EFLAGS_IF;
}
int all_segs_okay = 1;
/* Either selector != 0 ==> reload. */
- if ( unlikely(p->arch.user_ctxt.ds |
- n->arch.user_ctxt.ds) )
- all_segs_okay &= loadsegment(ds, n->arch.user_ctxt.ds);
+ if ( unlikely(p->arch.user_regs.ds |
+ n->arch.user_regs.ds) )
+ all_segs_okay &= loadsegment(ds, n->arch.user_regs.ds);
/* Either selector != 0 ==> reload. */
- if ( unlikely(p->arch.user_ctxt.es |
- n->arch.user_ctxt.es) )
- all_segs_okay &= loadsegment(es, n->arch.user_ctxt.es);
+ if ( unlikely(p->arch.user_regs.es |
+ n->arch.user_regs.es) )
+ all_segs_okay &= loadsegment(es, n->arch.user_regs.es);
/*
* Either selector != 0 ==> reload.
* Also reload to reset FS_BASE if it was non-zero.
*/
- if ( unlikely(p->arch.user_ctxt.fs |
- p->arch.user_ctxt.fs_base |
- n->arch.user_ctxt.fs) )
+ if ( unlikely(p->arch.user_regs.fs |
+ p->arch.user_regs.fs_base |
+ n->arch.user_regs.fs) )
{
- all_segs_okay &= loadsegment(fs, n->arch.user_ctxt.fs);
- if ( p->arch.user_ctxt.fs ) /* != 0 selector kills fs_base */
- p->arch.user_ctxt.fs_base = 0;
+ all_segs_okay &= loadsegment(fs, n->arch.user_regs.fs);
+ if ( p->arch.user_regs.fs ) /* != 0 selector kills fs_base */
+ p->arch.user_regs.fs_base = 0;
}
/*
* Either selector != 0 ==> reload.
* Also reload to reset GS_BASE if it was non-zero.
*/
- if ( unlikely(p->arch.user_ctxt.gs |
- p->arch.user_ctxt.gs_base_user |
- n->arch.user_ctxt.gs) )
+ if ( unlikely(p->arch.user_regs.gs |
+ p->arch.user_regs.gs_base_user |
+ n->arch.user_regs.gs) )
{
/* Reset GS_BASE with user %gs? */
- if ( p->arch.user_ctxt.gs || !n->arch.user_ctxt.gs_base_user )
- all_segs_okay &= loadsegment(gs, n->arch.user_ctxt.gs);
- if ( p->arch.user_ctxt.gs ) /* != 0 selector kills gs_base_user */
- p->arch.user_ctxt.gs_base_user = 0;
+ if ( p->arch.user_regs.gs || !n->arch.user_regs.gs_base_user )
+ all_segs_okay &= loadsegment(gs, n->arch.user_regs.gs);
+ if ( p->arch.user_regs.gs ) /* != 0 selector kills gs_base_user */
+ p->arch.user_regs.gs_base_user = 0;
}
/* This can only be non-zero if selector is NULL. */
- if ( n->arch.user_ctxt.fs_base )
+ if ( n->arch.user_regs.fs_base )
wrmsr(MSR_FS_BASE,
- n->arch.user_ctxt.fs_base,
- n->arch.user_ctxt.fs_base>>32);
+ n->arch.user_regs.fs_base,
+ n->arch.user_regs.fs_base>>32);
/* Most kernels have non-zero GS base, so don't bother testing. */
/* (This is also a serialising instruction, avoiding AMD erratum #88.) */
wrmsr(MSR_SHADOW_GS_BASE,
- n->arch.user_ctxt.gs_base_kernel,
- n->arch.user_ctxt.gs_base_kernel>>32);
+ n->arch.user_regs.gs_base_kernel,
+ n->arch.user_regs.gs_base_kernel>>32);
/* This can only be non-zero if selector is NULL. */
- if ( n->arch.user_ctxt.gs_base_user )
+ if ( n->arch.user_regs.gs_base_user )
wrmsr(MSR_GS_BASE,
- n->arch.user_ctxt.gs_base_user,
- n->arch.user_ctxt.gs_base_user>>32);
+ n->arch.user_regs.gs_base_user,
+ n->arch.user_regs.gs_base_user>>32);
/* If in kernel mode then switch the GS bases around. */
if ( n->arch.flags & TF_kernel_mode )
if ( unlikely(!all_segs_okay) )
{
- struct xen_regs *regs = get_execution_context();
+ struct cpu_user_regs *regs = get_cpu_user_regs();
unsigned long *rsp =
(n->arch.flags & TF_kernel_mode) ?
(unsigned long *)regs->rsp :
put_user(regs->rflags, rsp- 3) |
put_user(regs->cs, rsp- 4) |
put_user(regs->rip, rsp- 5) |
- put_user(n->arch.user_ctxt.gs, rsp- 6) |
- put_user(n->arch.user_ctxt.fs, rsp- 7) |
- put_user(n->arch.user_ctxt.es, rsp- 8) |
- put_user(n->arch.user_ctxt.ds, rsp- 9) |
+ put_user(n->arch.user_regs.gs, rsp- 6) |
+ put_user(n->arch.user_regs.fs, rsp- 7) |
+ put_user(n->arch.user_regs.es, rsp- 8) |
+ put_user(n->arch.user_regs.ds, rsp- 9) |
put_user(regs->r11, rsp-10) |
put_user(regs->rcx, rsp-11) )
{
static void save_segments(struct exec_domain *p)
{
- __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_ctxt.ds) );
- __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_ctxt.es) );
- __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_ctxt.fs) );
- __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_ctxt.gs) );
+ __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_regs.ds) );
+ __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_regs.es) );
+ __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_regs.fs) );
+ __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_regs.gs) );
}
static void clear_segments(void)
long do_switch_to_user(void)
{
- struct xen_regs *regs = get_execution_context();
+ struct cpu_user_regs *regs = get_cpu_user_regs();
struct switch_to_user stu;
struct exec_domain *ed = current;
static void __context_switch(void)
{
- execution_context_t *stack_ec = get_execution_context();
+ struct cpu_user_regs *stack_ec = get_cpu_user_regs();
unsigned int cpu = smp_processor_id();
struct exec_domain *p = percpu_ctxt[cpu].curr_ed;
struct exec_domain *n = current;
if ( !is_idle_task(p->domain) )
{
- memcpy(&p->arch.user_ctxt,
+ memcpy(&p->arch.user_regs,
stack_ec,
CTXT_SWITCH_STACK_BYTES);
unlazy_fpu(p);
if ( !is_idle_task(n->domain) )
{
memcpy(stack_ec,
- &n->arch.user_ctxt,
+ &n->arch.user_regs,
CTXT_SWITCH_STACK_BYTES);
/* Maybe switch the debug registers. */
unsigned int op, unsigned int nr_args, ...)
{
struct mc_state *mcs = &mc_state[smp_processor_id()];
- execution_context_t *ec;
+ struct cpu_user_regs *regs;
unsigned int i;
va_list args;
}
else
{
- ec = get_execution_context();
+ regs = get_cpu_user_regs();
#if defined(__i386__)
- ec->eax = op;
- ec->eip -= 2; /* re-execute 'int 0x82' */
+ regs->eax = op;
+ regs->eip -= 2; /* re-execute 'int 0x82' */
for ( i = 0; i < nr_args; i++ )
{
switch ( i )
{
- case 0: ec->ebx = va_arg(args, unsigned long); break;
- case 1: ec->ecx = va_arg(args, unsigned long); break;
- case 2: ec->edx = va_arg(args, unsigned long); break;
- case 3: ec->esi = va_arg(args, unsigned long); break;
- case 4: ec->edi = va_arg(args, unsigned long); break;
- case 5: ec->ebp = va_arg(args, unsigned long); break;
+ case 0: regs->ebx = va_arg(args, unsigned long); break;
+ case 1: regs->ecx = va_arg(args, unsigned long); break;
+ case 2: regs->edx = va_arg(args, unsigned long); break;
+ case 3: regs->esi = va_arg(args, unsigned long); break;
+ case 4: regs->edi = va_arg(args, unsigned long); break;
+ case 5: regs->ebp = va_arg(args, unsigned long); break;
}
}
#elif defined(__x86_64__)
- ec->rax = op;
- ec->rip -= 2; /* re-execute 'syscall' */
+ regs->rax = op;
+ regs->rip -= 2; /* re-execute 'syscall' */
for ( i = 0; i < nr_args; i++ )
{
switch ( i )
{
- case 0: ec->rdi = va_arg(args, unsigned long); break;
- case 1: ec->rsi = va_arg(args, unsigned long); break;
- case 2: ec->rdx = va_arg(args, unsigned long); break;
- case 3: ec->r10 = va_arg(args, unsigned long); break;
- case 4: ec->r8 = va_arg(args, unsigned long); break;
- case 5: ec->r9 = va_arg(args, unsigned long); break;
+ case 0: regs->rdi = va_arg(args, unsigned long); break;
+ case 1: regs->rsi = va_arg(args, unsigned long); break;
+ case 2: regs->rdx = va_arg(args, unsigned long); break;
+ case 3: regs->r10 = va_arg(args, unsigned long); break;
+ case 4: regs->r8 = va_arg(args, unsigned long); break;
+ case 5: regs->r9 = va_arg(args, unsigned long); break;
}
}
#endif
}
unsigned long
-search_pre_exception_table(struct xen_regs *regs)
+search_pre_exception_table(struct cpu_user_regs *regs)
{
unsigned long addr = (unsigned long)regs->eip;
unsigned long fixup = search_one_table(
static void __do_IRQ_guest(int irq);
-void no_action(int cpl, void *dev_id, struct xen_regs *regs) { }
+void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
static void enable_none(unsigned int irq) { }
static unsigned int startup_none(unsigned int irq) { return 0; }
spin_unlock_irqrestore(&desc->lock, flags);
}
-asmlinkage void do_IRQ(struct xen_regs *regs)
+asmlinkage void do_IRQ(struct cpu_user_regs *regs)
{
unsigned int irq = regs->entry_vector;
irq_desc_t *desc = &irq_desc[irq];
return EXCRET_fault_fixed;
emulate:
- if ( x86_emulate_memop(get_execution_context(), addr,
+ if ( x86_emulate_memop(get_cpu_user_regs(), addr,
&ptwr_mem_emulator, BITS_PER_LONG/8) )
return 0;
perfc_incrc(ptwr_emulations);
alert_counter[i] = 0;
}
-void nmi_watchdog_tick (struct xen_regs * regs)
+void nmi_watchdog_tick (struct cpu_user_regs * regs)
{
int sum, cpu = smp_processor_id();
free_out_of_sync_state(d);
}
-int shadow_fault(unsigned long va, struct xen_regs *regs)
+int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
{
l1_pgentry_t gpte, spte, orig_gpte;
struct exec_domain *ed = current;
static unsigned long wc_sec, wc_usec; /* UTC time at last 'time update'. */
static rwlock_t time_lock = RW_LOCK_UNLOCKED;
-void timer_interrupt(int irq, void *dev_id, struct xen_regs *regs)
+void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
write_lock_irq(&time_lock);
* are disabled). In such situations we can't do much that is safe. We try to
* print out some tracing and then we just spin.
*/
-asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs)
+asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
{
int cpu = smp_processor_id();
unsigned long cr2;
}
static inline int do_trap(int trapnr, char *str,
- struct xen_regs *regs,
+ struct cpu_user_regs *regs,
int use_error_code)
{
struct exec_domain *ed = current;
}
#define DO_ERROR_NOCODE(trapnr, str, name) \
-asmlinkage int do_##name(struct xen_regs *regs) \
+asmlinkage int do_##name(struct cpu_user_regs *regs) \
{ \
return do_trap(trapnr, str, regs, 0); \
}
#define DO_ERROR(trapnr, str, name) \
-asmlinkage int do_##name(struct xen_regs *regs) \
+asmlinkage int do_##name(struct cpu_user_regs *regs) \
{ \
return do_trap(trapnr, str, regs, 1); \
}
DO_ERROR(17, "alignment check", alignment_check)
DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
-asmlinkage int do_int3(struct xen_regs *regs)
+asmlinkage int do_int3(struct cpu_user_regs *regs)
{
struct exec_domain *ed = current;
struct trap_bounce *tb = &ed->arch.trap_bounce;
return 0;
}
-asmlinkage void do_machine_check(struct xen_regs *regs)
+asmlinkage void do_machine_check(struct cpu_user_regs *regs)
{
fatal_trap(TRAP_machine_check, regs);
}
ed->arch.guest_cr2 = addr;
}
-asmlinkage int do_page_fault(struct xen_regs *regs)
+asmlinkage int do_page_fault(struct cpu_user_regs *regs)
{
unsigned long off, addr, fixup;
struct exec_domain *ed = current;
/* Has the guest requested sufficient permission for this I/O access? */
static inline int guest_io_okay(
unsigned int port, unsigned int bytes,
- struct exec_domain *ed, struct xen_regs *regs)
+ struct exec_domain *ed, struct cpu_user_regs *regs)
{
u16 x;
#if defined(__x86_64__)
/* Has the administrator granted sufficient permission for this I/O access? */
static inline int admin_io_okay(
unsigned int port, unsigned int bytes,
- struct exec_domain *ed, struct xen_regs *regs)
+ struct exec_domain *ed, struct cpu_user_regs *regs)
{
struct domain *d = ed->domain;
u16 x;
goto read_fault; \
eip += _size; (_type)_x; })
-static int emulate_privileged_op(struct xen_regs *regs)
+static int emulate_privileged_op(struct cpu_user_regs *regs)
{
struct exec_domain *ed = current;
unsigned long *reg, eip = regs->eip;
return EXCRET_fault_fixed;
}
-asmlinkage int do_general_protection(struct xen_regs *regs)
+asmlinkage int do_general_protection(struct cpu_user_regs *regs)
{
struct exec_domain *ed = current;
struct trap_bounce *tb = &ed->arch.trap_bounce;
send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
}
-asmlinkage void mem_parity_error(struct xen_regs *regs)
+asmlinkage void mem_parity_error(struct cpu_user_regs *regs)
{
/* Clear and disable the parity-error line. */
outb((inb(0x61)&15)|4,0x61);
}
}
-asmlinkage void io_check_error(struct xen_regs *regs)
+asmlinkage void io_check_error(struct cpu_user_regs *regs)
{
/* Clear and disable the I/O-error line. */
outb((inb(0x61)&15)|8,0x61);
printk("Do you have a strange power saving mode enabled?\n");
}
-asmlinkage void do_nmi(struct xen_regs *regs, unsigned long reason)
+asmlinkage void do_nmi(struct cpu_user_regs *regs, unsigned long reason)
{
++nmi_count(smp_processor_id());
unknown_nmi_error((unsigned char)(reason&0xff));
}
-asmlinkage int math_state_restore(struct xen_regs *regs)
+asmlinkage int math_state_restore(struct cpu_user_regs *regs)
{
/* Prevent recursion. */
clts();
return EXCRET_fault_fixed;
}
-asmlinkage int do_debug(struct xen_regs *regs)
+asmlinkage int do_debug(struct cpu_user_regs *regs)
{
unsigned long condition;
struct exec_domain *ed = current;
return EXCRET_not_a_fault;
}
-asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs)
+asmlinkage int do_spurious_interrupt_bug(struct cpu_user_regs *regs)
{
return EXCRET_not_a_fault;
}
extern long evtchn_send(int lport);
extern long do_block(void);
-void do_nmi(struct xen_regs *, unsigned long);
+void do_nmi(struct cpu_user_regs *, unsigned long);
int start_vmx()
{
#include <asm/domain_page.h>
-static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs)
+static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
{
struct exec_domain *ed = current;
unsigned long eip;
return result;
}
-static void vmx_do_general_protection_fault(struct xen_regs *regs)
+static void vmx_do_general_protection_fault(struct cpu_user_regs *regs)
{
unsigned long eip, error_code;
unsigned long intr_fields;
__vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
}
-static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs)
+static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs)
{
unsigned int eax, ebx, ecx, edx;
unsigned long eip;
#define CASE_GET_REG_P(REG, reg) \
case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
-static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs)
+static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
{
unsigned int reg;
unsigned long *reg_p = 0;
shadow_invlpg(ed, va);
}
-static void vmx_io_instruction(struct xen_regs *regs,
+static void vmx_io_instruction(struct cpu_user_regs *regs,
unsigned long exit_qualification, unsigned long inst_len)
{
struct exec_domain *d = current;
/*
* Write to control registers
*/
-static int mov_to_cr(int gp, int cr, struct xen_regs *regs)
+static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
{
unsigned long value;
unsigned long old_cr;
/*
* Read from control registers. CR0 and CR4 are read from the shadow.
*/
-static void mov_from_cr(int cr, int gp, struct xen_regs *regs)
+static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
{
unsigned long value;
struct exec_domain *d = current;
VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
}
-static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs)
+static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
{
unsigned int gp, cr;
unsigned long value;
return 1;
}
-static inline void vmx_do_msr_read(struct xen_regs *regs)
+static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
{
VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
(unsigned long)regs->ecx, (unsigned long)regs->eax,
print_buf[index++] = c;
}
-void save_vmx_execution_context(execution_context_t *ctxt)
+void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
{
__vmread(GUEST_SS_SELECTOR, &ctxt->ss);
__vmread(GUEST_ESP, &ctxt->esp);
}
#ifdef XEN_DEBUGGER
-void save_xen_regs(struct xen_regs *regs)
+void save_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmread(GUEST_SS_SELECTOR, ®s->xss);
__vmread(GUEST_ESP, ®s->esp);
__vmread(GUEST_DS_SELECTOR, ®s->xds);
}
-void restore_xen_regs(struct xen_regs *regs)
+void restore_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmwrite(GUEST_SS_SELECTOR, regs->xss);
__vmwrite(GUEST_ESP, regs->esp);
}
#endif
-asmlinkage void vmx_vmexit_handler(struct xen_regs regs)
+asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
{
unsigned int exit_reason, idtv_info_field;
unsigned long exit_qualification, eip, inst_len = 0;
#ifdef XEN_DEBUGGER
case TRAP_debug:
{
- save_xen_regs(®s);
+ save_cpu_user_regs(®s);
pdb_handle_exception(1, ®s, 1);
- restore_xen_regs(®s);
+ restore_cpu_user_regs(®s);
break;
}
case TRAP_int3:
{
- save_xen_regs(®s);
+ save_cpu_user_regs(®s);
pdb_handle_exception(3, ®s, 1);
- restore_xen_regs(®s);
+ restore_cpu_user_regs(®s);
break;
}
#endif
case EXIT_REASON_EXTERNAL_INTERRUPT:
{
extern int vector_irq[];
- extern asmlinkage void do_IRQ(struct xen_regs *);
- extern void smp_apic_timer_interrupt(struct xen_regs *);
- extern void timer_interrupt(int, void *, struct xen_regs *);
+ extern asmlinkage void do_IRQ(struct cpu_user_regs *);
+ extern void smp_apic_timer_interrupt(struct cpu_user_regs *);
+ extern void timer_interrupt(int, void *, struct cpu_user_regs *);
unsigned int vector;
if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
/* vmx_io_assist light-weight version, specific to PIT DM */
static void resume_pit_io(ioreq_t *p)
{
- execution_context_t *ec = get_execution_context();
- unsigned long old_eax = ec->eax;
+ struct cpu_user_regs *regs = get_cpu_user_regs();
+ unsigned long old_eax = regs->eax;
p->state = STATE_INVALID;
switch(p->size) {
case 1:
- ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
+ regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
break;
case 2:
- ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
+ regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
break;
case 4:
- ec->eax = (p->u.data & 0xffffffff);
+ regs->eax = (p->u.data & 0xffffffff);
break;
default:
BUG();
extern long do_block();
#if defined (__i386__)
-static void load_xen_regs(struct xen_regs *regs)
+static void load_cpu_user_regs(struct cpu_user_regs *regs)
{
/*
* Write the guest register value into VMCS
__vmwrite(GUEST_EIP, regs->eip);
}
-static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
+static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
{
switch (size) {
case BYTE:
}
}
#else
-static void load_xen_regs(struct xen_regs *regs)
+static void load_cpu_user_regs(struct cpu_user_regs *regs)
{
/* XXX: TBD */
return;
}
-static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
+static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
{
/* XXX: TBD */
return;
vcpu_iodata_t *vio;
ioreq_t *p;
struct domain *d = ed->domain;
- execution_context_t *ec = get_execution_context();
+ struct cpu_user_regs *regs = get_cpu_user_regs();
unsigned long old_eax;
int sign;
struct mi_per_cpu_info *mpci_p;
- struct xen_regs *inst_decoder_regs;
+ struct cpu_user_regs *inst_decoder_regs;
mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci;
inst_decoder_regs = mpci_p->inst_decoder_regs;
sign = (p->df) ? -1 : 1;
if (p->port_mm) {
if (p->pdata_valid) {
- ec->esi += sign * p->count * p->size;
- ec->edi += sign * p->count * p->size;
+ regs->esi += sign * p->count * p->size;
+ regs->edi += sign * p->count * p->size;
} else {
if (p->dir == IOREQ_WRITE) {
return;
if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
p->u.data = p->u.data & 0xffff;
}
- set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data);
+ set_reg_value(size, index, 0, regs, p->u.data);
}
- load_xen_regs((struct xen_regs *)ec);
+ load_cpu_user_regs(regs);
return;
}
if (p->dir == IOREQ_WRITE) {
if (p->pdata_valid) {
- ec->esi += sign * p->count * p->size;
- ec->ecx -= p->count;
+ regs->esi += sign * p->count * p->size;
+ regs->ecx -= p->count;
}
return;
} else {
if (p->pdata_valid) {
- ec->edi += sign * p->count * p->size;
- ec->ecx -= p->count;
+ regs->edi += sign * p->count * p->size;
+ regs->ecx -= p->count;
return;
}
}
- old_eax = ec->eax;
+ old_eax = regs->eax;
switch(p->size) {
case 1:
- ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
+ regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
break;
case 2:
- ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
+ regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
break;
case 4:
- ec->eax = (p->u.data & 0xffffffff);
+ regs->eax = (p->u.data & 0xffffffff);
break;
default:
BUG();
#define DECODE_failure 0
#if defined (__x86_64__)
-static void store_xen_regs(struct xen_regs *regs)
+static void store_cpu_user_regs(struct cpu_user_regs *regs)
{
}
-static long get_reg_value(int size, int index, int seg, struct xen_regs *regs)
+static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
{
return 0;
}
#elif defined (__i386__)
-static void store_xen_regs(struct xen_regs *regs)
+static void store_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmread(GUEST_SS_SELECTOR, ®s->ss);
__vmread(GUEST_ESP, ®s->esp);
__vmread(GUEST_EIP, ®s->eip);
}
-static long get_reg_value(int size, int index, int seg, struct xen_regs *regs)
+static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
{
/*
* Reference the db_reg[] table
ioreq_t *p;
int vm86;
struct mi_per_cpu_info *mpci_p;
- struct xen_regs *inst_decoder_regs;
+ struct cpu_user_regs *inst_decoder_regs;
extern long evtchn_send(int lport);
extern long do_block(void);
unsigned long eip, eflags, cs;
unsigned long inst_len, inst_addr;
struct mi_per_cpu_info *mpci_p;
- struct xen_regs *inst_decoder_regs;
+ struct cpu_user_regs *inst_decoder_regs;
struct instruction mmio_inst;
unsigned char inst[MAX_INST_LEN];
int vm86, ret;
domain_crash_synchronous();
__vmwrite(GUEST_EIP, eip + inst_len);
- store_xen_regs(inst_decoder_regs);
+ store_cpu_user_regs(inst_decoder_regs);
// Only handle "mov" and "movs" instructions!
if (!strncmp((char *)mmio_inst.i_name, "movz", 4)) {
#define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
-int vmx_setup_platform(struct exec_domain *d, execution_context_t *context)
+int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs)
{
int i;
unsigned int n;
struct e820entry *e820p;
unsigned long gpfn = 0;
- context->ebx = 0; /* Linux expects ebx to be 0 for boot proc */
+ regs->ebx = 0; /* Linux expects ebx to be 0 for boot proc */
- n = context->ecx;
+ n = regs->ecx;
if (n > 32) {
VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n);
return -1;
}
- addr = context->edi;
+ addr = regs->edi;
offset = (addr & ~PAGE_MASK);
addr = round_pgdown(addr);
mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
struct Xgt_desc_struct desc;
unsigned long pfn = 0;
struct pfn_info *page;
- execution_context_t *ec = get_execution_context();
+ struct cpu_user_regs *regs = get_cpu_user_regs();
cpu = smp_processor_id();
page = (struct pfn_info *) alloc_domheap_page(NULL);
pfn = (unsigned long) (page - frame_table);
- vmx_setup_platform(ed, ec);
+ vmx_setup_platform(ed, regs);
__asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
host_env.gdtr_limit = desc.size;
* Initially set the same environement as host.
*/
static inline int
-construct_init_vmcs_guest(execution_context_t *context,
- full_execution_context_t *full_context,
+construct_init_vmcs_guest(struct cpu_user_regs *regs,
+ struct vcpu_guest_context *ctxt,
struct host_execution_env *host_env)
{
int error = 0;
error |= __vmwrite(CR3_TARGET_COUNT, 0);
/* Guest Selectors */
- error |= __vmwrite(GUEST_CS_SELECTOR, context->cs);
- error |= __vmwrite(GUEST_ES_SELECTOR, context->es);
- error |= __vmwrite(GUEST_SS_SELECTOR, context->ss);
- error |= __vmwrite(GUEST_DS_SELECTOR, context->ds);
- error |= __vmwrite(GUEST_FS_SELECTOR, context->fs);
- error |= __vmwrite(GUEST_GS_SELECTOR, context->gs);
+ error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs);
+ error |= __vmwrite(GUEST_ES_SELECTOR, regs->es);
+ error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss);
+ error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds);
+ error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs);
+ error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs);
/* Guest segment Limits */
error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
arbytes.fields.seg_type = 0xb; /* type = 0xb */
error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_GDTR_BASE, context->edx);
- context->edx = 0;
- error |= __vmwrite(GUEST_GDTR_LIMIT, context->eax);
- context->eax = 0;
+ error |= __vmwrite(GUEST_GDTR_BASE, regs->edx);
+ regs->edx = 0;
+ error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax);
+ regs->eax = 0;
arbytes.fields.s = 0; /* not code or data segement */
arbytes.fields.seg_type = 0x2; /* LTD */
error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
- error |= __vmwrite(GUEST_ESP, context->esp);
- error |= __vmwrite(GUEST_EIP, context->eip);
+ error |= __vmwrite(GUEST_ESP, regs->esp);
+ error |= __vmwrite(GUEST_EIP, regs->eip);
- eflags = context->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
+ eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
error |= __vmwrite(GUEST_EFLAGS, eflags);
*/
int construct_vmcs(struct arch_vmx_struct *arch_vmx,
- execution_context_t *context,
- full_execution_context_t *full_context,
+ struct cpu_user_regs *regs,
+ struct vcpu_guest_context *ctxt,
int use_host_env)
{
int error;
return -EINVAL;
}
/* guest selectors */
- if ((error = construct_init_vmcs_guest(context, full_context, &host_env))) {
+ if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) {
printk("construct_vmcs: construct_vmcs_guest failed\n");
return -EINVAL;
}
void __dummy__(void)
{
- OFFSET(XREGS_eax, struct xen_regs, eax);
- OFFSET(XREGS_ebx, struct xen_regs, ebx);
- OFFSET(XREGS_ecx, struct xen_regs, ecx);
- OFFSET(XREGS_edx, struct xen_regs, edx);
- OFFSET(XREGS_esi, struct xen_regs, esi);
- OFFSET(XREGS_edi, struct xen_regs, edi);
- OFFSET(XREGS_esp, struct xen_regs, esp);
- OFFSET(XREGS_ebp, struct xen_regs, ebp);
- OFFSET(XREGS_eip, struct xen_regs, eip);
- OFFSET(XREGS_cs, struct xen_regs, cs);
- OFFSET(XREGS_ds, struct xen_regs, ds);
- OFFSET(XREGS_es, struct xen_regs, es);
- OFFSET(XREGS_fs, struct xen_regs, fs);
- OFFSET(XREGS_gs, struct xen_regs, gs);
- OFFSET(XREGS_ss, struct xen_regs, ss);
- OFFSET(XREGS_eflags, struct xen_regs, eflags);
- OFFSET(XREGS_error_code, struct xen_regs, error_code);
- OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
- OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp);
- DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
+ OFFSET(UREGS_eax, struct cpu_user_regs, eax);
+ OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
+ OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
+ OFFSET(UREGS_edx, struct cpu_user_regs, edx);
+ OFFSET(UREGS_esi, struct cpu_user_regs, esi);
+ OFFSET(UREGS_edi, struct cpu_user_regs, edi);
+ OFFSET(UREGS_esp, struct cpu_user_regs, esp);
+ OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
+ OFFSET(UREGS_eip, struct cpu_user_regs, eip);
+ OFFSET(UREGS_cs, struct cpu_user_regs, cs);
+ OFFSET(UREGS_ds, struct cpu_user_regs, ds);
+ OFFSET(UREGS_es, struct cpu_user_regs, es);
+ OFFSET(UREGS_fs, struct cpu_user_regs, fs);
+ OFFSET(UREGS_gs, struct cpu_user_regs, gs);
+ OFFSET(UREGS_ss, struct cpu_user_regs, ss);
+ OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
+ OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
+ OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
+ OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
+ DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
BLANK();
OFFSET(EDOMAIN_processor, struct exec_domain, processor);
#include <asm/asm-offsets.h>
- // int call_with_registers(void (*f)(struct xen_regs *r)) ->
- // build a xen_regs structure, and then call f with that.
+ // int call_with_registers(void (*f)(struct cpu_user_regs *r)) ->
+ // build a cpu_user_regs structure, and then call f with that.
call_with_registers:
pushf
- subl $XREGS_user_sizeof, %esp
- movl %ebx, XREGS_ebx(%esp)
- movl %ecx, XREGS_ecx(%esp)
- movl %edx, XREGS_edx(%esp)
- movl %esi, XREGS_esi(%esp)
- movl %edi, XREGS_edi(%esp)
- movl %ebp, XREGS_ebp(%esp)
- movl %eax, XREGS_eax(%esp)
- movw $0, XREGS_error_code(%esp)
- movw $0, XREGS_entry_vector(%esp)
- movl XREGS_user_sizeof+4(%esp), %eax
- movl %eax, XREGS_eip(%esp)
- movl %cs, XREGS_cs(%esp)
- movl XREGS_user_sizeof(%esp), %eax
- movl %eax, XREGS_eflags(%esp)
- movl %esp, XREGS_esp(%esp)
- addl $XREGS_user_sizeof+4, XREGS_esp(%esp)
- movl %ss, XREGS_ss(%esp)
- movl %es, XREGS_es(%esp)
- movl %ds, XREGS_ds(%esp)
- movl %fs, XREGS_fs(%esp)
- movl %gs, XREGS_gs(%esp)
+ subl $UREGS_user_sizeof, %esp
+ movl %ebx, UREGS_ebx(%esp)
+ movl %ecx, UREGS_ecx(%esp)
+ movl %edx, UREGS_edx(%esp)
+ movl %esi, UREGS_esi(%esp)
+ movl %edi, UREGS_edi(%esp)
+ movl %ebp, UREGS_ebp(%esp)
+ movl %eax, UREGS_eax(%esp)
+ movw $0, UREGS_error_code(%esp)
+ movw $0, UREGS_entry_vector(%esp)
+ movl UREGS_user_sizeof+4(%esp), %eax
+ movl %eax, UREGS_eip(%esp)
+ movl %cs, UREGS_cs(%esp)
+ movl UREGS_user_sizeof(%esp), %eax
+ movl %eax, UREGS_eflags(%esp)
+ movl %esp, UREGS_esp(%esp)
+ addl $UREGS_user_sizeof+4, UREGS_esp(%esp)
+ movl %ss, UREGS_ss(%esp)
+ movl %es, UREGS_es(%esp)
+ movl %ds, UREGS_ds(%esp)
+ movl %fs, UREGS_fs(%esp)
+ movl %gs, UREGS_gs(%esp)
- movl XREGS_user_sizeof+8(%esp), %eax
+ movl UREGS_user_sizeof+8(%esp), %eax
pushl %esp
call *%eax
- add $XREGS_user_sizeof + 8, %esp
+ add $UREGS_user_sizeof + 8, %esp
ret
* and we set it to the fixed value.
*
* We also need the room, especially because orig_eax field is used
- * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
+ * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
* (10) u32 gs;
* (9) u32 fs;
* (8) u32 ds;
pushl $VMX_MONITOR_EFLAGS; \
popf; \
subl $(NR_SKIPPED_REGS*4), %esp; \
- movl $0, 0xc(%esp); /* eflags==0 identifies xen_regs as VMX guest */ \
+ movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \
pushl %eax; \
pushl %ebp; \
pushl %edi; \
ALIGN
restore_all_guest:
- testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
+ testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
jnz restore_all_vm86
-FLT1: movl XREGS_ds(%esp),%ds
-FLT2: movl XREGS_es(%esp),%es
-FLT3: movl XREGS_fs(%esp),%fs
-FLT4: movl XREGS_gs(%esp),%gs
+FLT1: movl UREGS_ds(%esp),%ds
+FLT2: movl UREGS_es(%esp),%es
+FLT3: movl UREGS_fs(%esp),%fs
+FLT4: movl UREGS_gs(%esp),%gs
restore_all_vm86:
popl %ebx
popl %ecx
.section .fixup,"ax"
FIX5: subl $28,%esp
pushl 28(%esp) # error_code/entry_vector
- movl %eax,XREGS_eax+4(%esp)
- movl %ebp,XREGS_ebp+4(%esp)
- movl %edi,XREGS_edi+4(%esp)
- movl %esi,XREGS_esi+4(%esp)
- movl %edx,XREGS_edx+4(%esp)
- movl %ecx,XREGS_ecx+4(%esp)
- movl %ebx,XREGS_ebx+4(%esp)
+ movl %eax,UREGS_eax+4(%esp)
+ movl %ebp,UREGS_ebp+4(%esp)
+ movl %edi,UREGS_edi+4(%esp)
+ movl %esi,UREGS_esi+4(%esp)
+ movl %edx,UREGS_edx+4(%esp)
+ movl %ecx,UREGS_ecx+4(%esp)
+ movl %ebx,UREGS_ebx+4(%esp)
FIX1: SET_XEN_SEGMENTS(a)
movl %eax,%fs
movl %eax,%gs
movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
xorl %eax,%eax
- movl %eax,XREGS_ds(%esp)
- movl %eax,XREGS_es(%esp)
- movl %eax,XREGS_fs(%esp)
- movl %eax,XREGS_gs(%esp)
+ movl %eax,UREGS_ds(%esp)
+ movl %eax,UREGS_es(%esp)
+ movl %eax,UREGS_fs(%esp)
+ movl %eax,UREGS_gs(%esp)
jmp test_all_events
.previous
.section __pre_ex_table,"a"
andl $(NR_hypercalls-1),%eax
PERFC_INCR(PERFC_hypercalls, %eax)
call *SYMBOL_NAME(hypercall_table)(,%eax,4)
- movl %eax,XREGS_eax(%esp) # save the return value
+ movl %eax,UREGS_eax(%esp) # save the return value
test_all_events:
xorl %ecx,%ecx
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
/* {EIP, CS, EFLAGS, [ESP, SS]} */
/* %edx == trap_bounce, %ebx == struct exec_domain */
-/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
+/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
create_bounce_frame:
- movl XREGS_eflags+4(%esp),%ecx
- movb XREGS_cs+4(%esp),%cl
+ movl UREGS_eflags+4(%esp),%ecx
+ movb UREGS_cs+4(%esp),%cl
testl $(2|X86_EFLAGS_VM),%ecx
jz ring1 /* jump if returning to an existing ring-1 activation */
movl EDOMAIN_kernel_sp(%ebx),%esi
FLT6: movl EDOMAIN_kernel_ss(%ebx),%gs
- testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
+ testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
jz nvm86_1
subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
- movl XREGS_es+4(%esp),%eax
+ movl UREGS_es+4(%esp),%eax
FLT7: movl %eax,%gs:(%esi)
- movl XREGS_ds+4(%esp),%eax
+ movl UREGS_ds+4(%esp),%eax
FLT8: movl %eax,%gs:4(%esi)
- movl XREGS_fs+4(%esp),%eax
+ movl UREGS_fs+4(%esp),%eax
FLT9: movl %eax,%gs:8(%esi)
- movl XREGS_gs+4(%esp),%eax
+ movl UREGS_gs+4(%esp),%eax
FLT10: movl %eax,%gs:12(%esi)
nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
- movl XREGS_esp+4(%esp),%eax
+ movl UREGS_esp+4(%esp),%eax
FLT11: movl %eax,%gs:(%esi)
- movl XREGS_ss+4(%esp),%eax
+ movl UREGS_ss+4(%esp),%eax
FLT12: movl %eax,%gs:4(%esi)
jmp 1f
ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
- movl XREGS_esp+4(%esp),%esi
-FLT13: movl XREGS_ss+4(%esp),%gs
+ movl UREGS_esp+4(%esp),%esi
+FLT13: movl UREGS_ss+4(%esp),%gs
1: /* Construct a stack frame: EFLAGS, CS/EIP */
subl $12,%esi
- movl XREGS_eip+4(%esp),%eax
+ movl UREGS_eip+4(%esp),%eax
FLT14: movl %eax,%gs:(%esi)
- movl XREGS_cs+4(%esp),%eax
+ movl UREGS_cs+4(%esp),%eax
FLT15: movl %eax,%gs:4(%esi)
- movl XREGS_eflags+4(%esp),%eax
+ movl UREGS_eflags+4(%esp),%eax
FLT16: movl %eax,%gs:8(%esi)
movb TRAPBOUNCE_flags(%edx),%cl
test $TBF_EXCEPTION_ERRCODE,%cl
1: testb $TBF_FAILSAFE,%cl
jz 2f
subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
- testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
+ testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
jz nvm86_2
xorl %eax,%eax # VM86: we write zero selector values
FLT19: movl %eax,%gs:(%esi)
FLT21: movl %eax,%gs:8(%esi)
FLT22: movl %eax,%gs:12(%esi)
jmp 2f
-nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
+nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
FLT23: movl %eax,%gs:(%esi)
- movl XREGS_es+4(%esp),%eax
+ movl UREGS_es+4(%esp),%eax
FLT24: movl %eax,%gs:4(%esi)
- movl XREGS_fs+4(%esp),%eax
+ movl UREGS_fs+4(%esp),%eax
FLT25: movl %eax,%gs:8(%esi)
- movl XREGS_gs+4(%esp),%eax
+ movl UREGS_gs+4(%esp),%eax
FLT26: movl %eax,%gs:12(%esi)
-2: testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
+2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
jz nvm86_3
xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
- movl %eax,XREGS_ds+4(%esp)
- movl %eax,XREGS_es+4(%esp)
- movl %eax,XREGS_fs+4(%esp)
- movl %eax,XREGS_gs+4(%esp)
+ movl %eax,UREGS_ds+4(%esp)
+ movl %eax,UREGS_es+4(%esp)
+ movl %eax,UREGS_fs+4(%esp)
+ movl %eax,UREGS_gs+4(%esp)
nvm86_3:/* Rewrite our stack frame and return to ring 1. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
- andl $0xfffcbeff,XREGS_eflags+4(%esp)
- movl %gs,XREGS_ss+4(%esp)
- movl %esi,XREGS_esp+4(%esp)
+ andl $0xfffcbeff,UREGS_eflags+4(%esp)
+ movl %gs,UREGS_ss+4(%esp)
+ movl %esi,UREGS_esp+4(%esp)
movzwl TRAPBOUNCE_cs(%edx),%eax
- movl %eax,XREGS_cs+4(%esp)
+ movl %eax,UREGS_cs+4(%esp)
movl TRAPBOUNCE_eip(%edx),%eax
- movl %eax,XREGS_eip+4(%esp)
+ movl %eax,UREGS_eip+4(%esp)
movb $0,TRAPBOUNCE_flags(%edx)
ret
.section __ex_table,"a"
ALIGN
ENTRY(ret_from_intr)
GET_CURRENT(%ebx)
- movl XREGS_eflags(%esp),%eax
- movb XREGS_cs(%esp),%al
+ movl UREGS_eflags(%esp),%eax
+ movb UREGS_cs(%esp),%al
testl $(3|X86_EFLAGS_VM),%eax
jnz test_all_events
jmp restore_all_xen
error_code:
SAVE_ALL_NOSEGREGS(a)
SET_XEN_SEGMENTS(a)
- testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
+ testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
jz exception_with_ints_disabled
sti # re-enable interrupts
xorl %eax,%eax
- movw XREGS_entry_vector(%esp),%ax
+ movw UREGS_entry_vector(%esp),%ax
movl %esp,%edx
- pushl %edx # push the xen_regs pointer
+ pushl %edx # push the cpu_user_regs pointer
GET_CURRENT(%ebx)
PERFC_INCR(PERFC_exceptions, %eax)
call *SYMBOL_NAME(exception_table)(,%eax,4)
addl $4,%esp
- movl XREGS_eflags(%esp),%eax
- movb XREGS_cs(%esp),%al
+ movl UREGS_eflags(%esp),%eax
+ movb UREGS_cs(%esp),%al
testl $(3|X86_EFLAGS_VM),%eax
jz restore_all_xen
jmp process_guest_exception_and_events
exception_with_ints_disabled:
- movl XREGS_eflags(%esp),%eax
- movb XREGS_cs(%esp),%al
+ movl UREGS_eflags(%esp),%eax
+ movb UREGS_cs(%esp),%al
testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
jnz FATAL_exception_with_ints_disabled
pushl %esp
addl $4,%esp
testl %eax,%eax # no fixup code for faulting EIP?
jz FATAL_exception_with_ints_disabled
- movl %eax,XREGS_eip(%esp)
+ movl %eax,UREGS_eip(%esp)
movl %esp,%esi
subl $4,%esp
movl %esp,%edi
- movl $XREGS_kernel_sizeof/4,%ecx
+ movl $UREGS_kernel_sizeof/4,%ecx
rep; movsl # make room for error_code/entry_vector
- movl XREGS_error_code(%esp),%eax # error_code/entry_vector
- movl %eax,XREGS_kernel_sizeof(%esp)
+ movl UREGS_error_code(%esp),%eax # error_code/entry_vector
+ movl %eax,UREGS_kernel_sizeof(%esp)
jmp restore_all_xen # return to fixup code
FATAL_exception_with_ints_disabled:
xorl %esi,%esi
- movw XREGS_entry_vector(%esp),%si
+ movw UREGS_entry_vector(%esp),%si
movl %esp,%edx
- pushl %edx # push the xen_regs pointer
+ pushl %edx # push the cpu_user_regs pointer
pushl %esi # push the trapnr (entry vector)
call SYMBOL_NAME(fatal_trap)
ud2
# In all other cases we bail without touching DS-GS, as we have
# interrupted an enclosing Xen activation in tricky prologue or
# epilogue code.
- movl XREGS_eflags(%esp),%eax
- movb XREGS_cs(%esp),%al
+ movl UREGS_eflags(%esp),%eax
+ movb UREGS_cs(%esp),%al
testl $(3|X86_EFLAGS_VM),%eax
jnz do_watchdog_tick
movl %ds,%eax
push %edx
call SYMBOL_NAME(mem_parity_error)
addl $4,%esp
-nmi_out:movl %ss:XREGS_eflags(%esp),%eax
- movb %ss:XREGS_cs(%esp),%al
+nmi_out:movl %ss:UREGS_eflags(%esp),%eax
+ movb %ss:UREGS_cs(%esp),%al
testl $(3|X86_EFLAGS_VM),%eax
jz restore_all_xen
movl $(__HYPERVISOR_DS),%edx
addl $4,%esp
# GS:ESI == Ring-1 stack activation
- movl XREGS_esp(%esp),%esi
-VFLT1: movl XREGS_ss(%esp),%gs
+ movl UREGS_esp(%esp),%esi
+VFLT1: movl UREGS_ss(%esp),%gs
# ES:EDI == Ring-0 stack activation
- leal XREGS_eip(%esp),%edi
+ leal UREGS_eip(%esp),%edi
# Restore the hypercall-number-clobbered EAX on our stack frame
VFLT2: movl %gs:(%esi),%eax
- movl %eax,XREGS_eax(%esp)
+ movl %eax,UREGS_eax(%esp)
addl $4,%esi
# Copy the VM86 activation from the ring-1 stack to the ring-0 stack
- movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
+ movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
VFLT3: movl %gs:(%esi),%eax
stosl
addl $4,%esi
loop VFLT3
# Fix up EFLAGS: IOPL=0, IF=1, VM=1
- andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
- orl $X86_EFLAGS_IF|X86_EFLAGS_VM,XREGS_eflags(%esp)
+ andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
+ orl $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
jmp test_all_events
* Called from the general-protection fault handler to attempt to decode
* and emulate an instruction that depends on 4GB segments.
*/
-int gpf_emulate_4gb(struct xen_regs *regs)
+int gpf_emulate_4gb(struct cpu_user_regs *regs)
{
struct exec_domain *d = current;
trap_info_t *ti;
void show_guest_stack(void)
{
int i;
- execution_context_t *ec = get_execution_context();
- unsigned long *stack = (unsigned long *)ec->esp;
- printk("Guest EIP is %08x\n ", ec->eip);
+ struct cpu_user_regs *regs = get_cpu_user_regs();
+ unsigned long *stack = (unsigned long *)regs->esp;
+
+ printk("Guest EIP is %08x\n ", regs->eip);
for ( i = 0; i < kstack_depth_to_print; i++ )
{
show_trace( esp );
}
-void show_registers(struct xen_regs *regs)
+void show_registers(struct cpu_user_regs *regs)
{
unsigned long ss, ds, es, fs, gs, cs;
unsigned long eip, esp, eflags;
}
BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
-asmlinkage void smp_deferred_nmi(struct xen_regs regs)
+asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs)
{
- asmlinkage void do_nmi(struct xen_regs *, unsigned long);
+ asmlinkage void do_nmi(struct cpu_user_regs *, unsigned long);
ack_APIC_irq();
do_nmi(®s, 0);
}
void __dummy__(void)
{
- OFFSET(XREGS_r15, struct xen_regs, r15);
- OFFSET(XREGS_r14, struct xen_regs, r14);
- OFFSET(XREGS_r13, struct xen_regs, r13);
- OFFSET(XREGS_r12, struct xen_regs, r12);
- OFFSET(XREGS_rbp, struct xen_regs, rbp);
- OFFSET(XREGS_rbx, struct xen_regs, rbx);
- OFFSET(XREGS_r11, struct xen_regs, r11);
- OFFSET(XREGS_r10, struct xen_regs, r10);
- OFFSET(XREGS_r9, struct xen_regs, r9);
- OFFSET(XREGS_r8, struct xen_regs, r8);
- OFFSET(XREGS_rax, struct xen_regs, rax);
- OFFSET(XREGS_rcx, struct xen_regs, rcx);
- OFFSET(XREGS_rdx, struct xen_regs, rdx);
- OFFSET(XREGS_rsi, struct xen_regs, rsi);
- OFFSET(XREGS_rdi, struct xen_regs, rdi);
- OFFSET(XREGS_error_code, struct xen_regs, error_code);
- OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
- OFFSET(XREGS_rip, struct xen_regs, rip);
- OFFSET(XREGS_cs, struct xen_regs, cs);
- OFFSET(XREGS_eflags, struct xen_regs, eflags);
- OFFSET(XREGS_rsp, struct xen_regs, rsp);
- OFFSET(XREGS_ss, struct xen_regs, ss);
- OFFSET(XREGS_kernel_sizeof, struct xen_regs, es);
- DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
+ OFFSET(UREGS_r15, struct cpu_user_regs, r15);
+ OFFSET(UREGS_r14, struct cpu_user_regs, r14);
+ OFFSET(UREGS_r13, struct cpu_user_regs, r13);
+ OFFSET(UREGS_r12, struct cpu_user_regs, r12);
+ OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
+ OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
+ OFFSET(UREGS_r11, struct cpu_user_regs, r11);
+ OFFSET(UREGS_r10, struct cpu_user_regs, r10);
+ OFFSET(UREGS_r9, struct cpu_user_regs, r9);
+ OFFSET(UREGS_r8, struct cpu_user_regs, r8);
+ OFFSET(UREGS_rax, struct cpu_user_regs, rax);
+ OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
+ OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
+ OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
+ OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
+ OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
+ OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
+ OFFSET(UREGS_rip, struct cpu_user_regs, rip);
+ OFFSET(UREGS_cs, struct cpu_user_regs, cs);
+ OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
+ OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
+ OFFSET(UREGS_ss, struct cpu_user_regs, ss);
+ OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
+ DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
BLANK();
OFFSET(EDOMAIN_processor, struct exec_domain, processor);
leaq SYMBOL_NAME(hypercall_table)(%rip),%r10
PERFC_INCR(PERFC_hypercalls, %rax)
callq *(%r10,%rax,8)
- movq %rax,XREGS_rax(%rsp) # save the return value
+ movq %rax,UREGS_rax(%rsp) # save the return value
/* %rbx: struct exec_domain */
test_all_events:
* and we set it to the fixed value.
*
* We also need the room, especially because orig_eax field is used
- * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
+ * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
* (13) u64 gs_base_user;
* (12) u64 gs_base_kernel;
* (11) u64 fs_base;
movq EDOMAIN_kernel_sp(%rbx),%rsi
jmp 2f
1: /* In kernel context already: push new frame at existing %rsp. */
- movq XREGS_rsp+8(%rsp),%rsi
- andb $0xfc,XREGS_cs+8(%rsp) # Indicate kernel context to guest.
+ movq UREGS_rsp+8(%rsp),%rsi
+ andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest.
2: movq $HYPERVISOR_VIRT_START,%rax
cmpq %rax,%rsi
jb 1f # In +ve address space? Then okay.
cmpq %rax,%rsi
jb domain_crash_synchronous # Above Xen private area? Then okay.
1: subq $40,%rsi
- movq XREGS_ss+8(%rsp),%rax
+ movq UREGS_ss+8(%rsp),%rax
FLT2: movq %rax,32(%rsi) # SS
- movq XREGS_rsp+8(%rsp),%rax
+ movq UREGS_rsp+8(%rsp),%rax
FLT3: movq %rax,24(%rsi) # RSP
- movq XREGS_eflags+8(%rsp),%rax
+ movq UREGS_eflags+8(%rsp),%rax
FLT4: movq %rax,16(%rsi) # RFLAGS
- movq XREGS_cs+8(%rsp),%rax
+ movq UREGS_cs+8(%rsp),%rax
FLT5: movq %rax,8(%rsi) # CS
- movq XREGS_rip+8(%rsp),%rax
+ movq UREGS_rip+8(%rsp),%rax
FLT6: movq %rax,(%rsi) # RIP
movb TRAPBOUNCE_flags(%rdx),%cl
testb $TBF_EXCEPTION_ERRCODE,%cl
movl %ds,%eax
FLT12: movq %rax,(%rsi) # DS
2: subq $16,%rsi
- movq XREGS_r11+8(%rsp),%rax
+ movq UREGS_r11+8(%rsp),%rax
FLT13: movq %rax,8(%rsi) # R11
- movq XREGS_rcx+8(%rsp),%rax
+ movq UREGS_rcx+8(%rsp),%rax
FLT14: movq %rax,(%rsi) # RCX
/* Rewrite our stack frame and return to guest-OS mode. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
- movq $TRAP_syscall,XREGS_entry_vector+8(%rsp)
- andl $0xfffcbeff,XREGS_eflags+8(%rsp)
- movq $__GUEST_SS,XREGS_ss+8(%rsp)
- movq %rsi,XREGS_rsp+8(%rsp)
- movq $__GUEST_CS,XREGS_cs+8(%rsp)
+ movq $TRAP_syscall,UREGS_entry_vector+8(%rsp)
+ andl $0xfffcbeff,UREGS_eflags+8(%rsp)
+ movq $__GUEST_SS,UREGS_ss+8(%rsp)
+ movq %rsi,UREGS_rsp+8(%rsp)
+ movq $__GUEST_CS,UREGS_cs+8(%rsp)
movq TRAPBOUNCE_eip(%rdx),%rax
- movq %rax,XREGS_rip+8(%rsp)
+ movq %rax,UREGS_rip+8(%rsp)
movb $0,TRAPBOUNCE_flags(%rdx)
ret
.section __ex_table,"a"
/* No special register assumptions. */
ENTRY(ret_from_intr)
GET_CURRENT(%rbx)
- testb $3,XREGS_cs(%rsp)
+ testb $3,UREGS_cs(%rsp)
jnz test_all_events
jmp restore_all_xen
/* No special register assumptions. */
error_code:
SAVE_ALL
- testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%rsp)
+ testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
jz exception_with_ints_disabled
sti
movq %rsp,%rdi
- movl XREGS_entry_vector(%rsp),%eax
+ movl UREGS_entry_vector(%rsp),%eax
leaq SYMBOL_NAME(exception_table)(%rip),%rdx
GET_CURRENT(%rbx)
PERFC_INCR(PERFC_exceptions, %rax)
callq *(%rdx,%rax,8)
- testb $3,XREGS_cs(%rsp)
+ testb $3,UREGS_cs(%rsp)
jz restore_all_xen
jmp process_guest_exception_and_events
/* No special register assumptions. */
exception_with_ints_disabled:
- testb $3,XREGS_cs(%rsp) # interrupts disabled outside Xen?
+ testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
jnz FATAL_exception_with_ints_disabled
movq %rsp,%rdi
call search_pre_exception_table
testq %rax,%rax # no fixup code for faulting EIP?
jz FATAL_exception_with_ints_disabled
- movq %rax,XREGS_rip(%rsp)
- subq $8,XREGS_rsp(%rsp) # add ec/ev to previous stack frame
- testb $15,XREGS_rsp(%rsp) # return %rsp is now aligned?
+ movq %rax,UREGS_rip(%rsp)
+ subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame
+ testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned?
jz 1f # then there is a pad quadword already
movq %rsp,%rsi
subq $8,%rsp
movq %rsp,%rdi
- movq $XREGS_kernel_sizeof/8,%rcx
+ movq $UREGS_kernel_sizeof/8,%rcx
rep; movsq # make room for ec/ev
-1: movq XREGS_error_code(%rsp),%rax # ec/ev
- movq %rax,XREGS_kernel_sizeof(%rsp)
+1: movq UREGS_error_code(%rsp),%rax # ec/ev
+ movq %rax,UREGS_kernel_sizeof(%rsp)
jmp restore_all_xen # return to fixup code
/* No special register assumptions. */
FATAL_exception_with_ints_disabled:
- movl XREGS_entry_vector(%rsp),%edi
+ movl UREGS_entry_vector(%rsp),%edi
movq %rsp,%rsi
call SYMBOL_NAME(fatal_trap)
ud2
switch ( which )
{
case SEGBASE_FS:
- ed->arch.user_ctxt.fs_base = base;
+ ed->arch.user_regs.fs_base = base;
if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
ret = -EFAULT;
break;
case SEGBASE_GS_USER:
- ed->arch.user_ctxt.gs_base_user = base;
+ ed->arch.user_regs.gs_base_user = base;
if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
ret = -EFAULT;
break;
case SEGBASE_GS_KERNEL:
- ed->arch.user_ctxt.gs_base_kernel = base;
+ ed->arch.user_regs.gs_base_kernel = base;
if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
ret = -EFAULT;
break;
void show_guest_stack(void)
{
int i;
- execution_context_t *ec = get_execution_context();
- unsigned long *stack = (unsigned long *)ec->rsp;
- printk("Guest RIP is %016lx\n ", ec->rip);
+ struct cpu_user_regs *regs = get_cpu_user_regs();
+ unsigned long *stack = (unsigned long *)regs->rsp;
+
+ printk("Guest RIP is %016lx\n ", regs->rip);
for ( i = 0; i < kstack_depth_to_print; i++ )
{
show_trace(rsp);
}
-void show_registers(struct xen_regs *regs)
+void show_registers(struct cpu_user_regs *regs)
{
printk("CPU: %d\nEIP: %04lx:[<%016lx>] \nEFLAGS: %016lx\n",
smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags);
}
asmlinkage void double_fault(void);
-asmlinkage void do_double_fault(struct xen_regs *regs)
+asmlinkage void do_double_fault(struct cpu_user_regs *regs)
{
/* Disable the NMI watchdog. It's useless now. */
watchdog_on = 0;
void *
decode_register(
- u8 modrm_reg, struct xen_regs *regs, int highbyte_regs)
+ u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
{
void *p;
int
x86_emulate_memop(
- struct xen_regs *regs,
+ struct cpu_user_regs *regs,
unsigned long cr2,
struct x86_mem_emulator *ops,
int mode)
struct operand src, dst;
/* Shadow copy of register state. Committed on successful emulation. */
- struct xen_regs _regs = *regs;
+ struct cpu_user_regs _regs = *regs;
/* Legacy prefixes. */
for ( i = 0; i < 8; i++ )
extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
extern void arch_getdomaininfo_ctxt(
- struct exec_domain *, full_execution_context_t *);
+ struct exec_domain *, struct vcpu_guest_context *);
static inline int is_free_domid(domid_t dom)
{
case DOM0_GETDOMAININFO:
{
- full_execution_context_t *c;
+ struct vcpu_guest_context *c;
struct domain *d;
struct exec_domain *ed;
if ( op->u.getdomaininfo.ctxt != NULL )
{
- if ( (c = xmalloc(full_execution_context_t)) == NULL )
+ if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
{
ret = -ENOMEM;
put_domain(d);
int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo)
{
int rc = 0;
- full_execution_context_t *c = NULL;
+ struct vcpu_guest_context *c = NULL;
unsigned long vcpu = setdomaininfo->exec_domain;
struct exec_domain *ed;
!test_bit(EDF_CTRLPAUSE, &ed->ed_flags))
return -EINVAL;
- if ( (c = xmalloc(full_execution_context_t)) == NULL )
+ if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
return -ENOMEM;
if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) )
* than domain 0. ie. the domains that are being built by the userspace dom0
* domain builder.
*/
-long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt)
+long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt)
{
struct domain *d = current->domain;
struct exec_domain *ed;
int rc = 0;
- full_execution_context_t *c;
+ struct vcpu_guest_context *c;
if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
return -EINVAL;
if ( alloc_exec_domain_struct(d, vcpu) == NULL )
return -ENOMEM;
- if ( (c = xmalloc(full_execution_context_t)) == NULL )
+ if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
{
rc = -ENOMEM;
goto out;
(*h)(key);
}
-void handle_keypress(unsigned char key, struct xen_regs *regs)
+void handle_keypress(unsigned char key, struct cpu_user_regs *regs)
{
irq_keyhandler_t *h;
key_table[i].desc);
}
-static void dump_registers(unsigned char key, struct xen_regs *regs)
+static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
{
printk("'%c' pressed -> dumping registers\n", key);
show_registers(regs);
}
-static void halt_machine(unsigned char key, struct xen_regs *regs)
+static void halt_machine(unsigned char key, struct cpu_user_regs *regs)
{
printk("'%c' pressed -> rebooting machine\n", key);
machine_restart(NULL);
printk("Notifying guest... %d/%d\n", d->id, ed->eid);
printk("port %d/%d stat %d %d %d\n",
VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_pending[0]),
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_mask[0]),
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, &ed->vcpu_info->evtchn_pending_sel));
+ test_bit(ed->virq_to_evtchn[VIRQ_DEBUG],
+ &d->shared_info->evtchn_pending[0]),
+ test_bit(ed->virq_to_evtchn[VIRQ_DEBUG],
+ &d->shared_info->evtchn_mask[0]),
+ test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5,
+ &ed->vcpu_info->evtchn_pending_sel));
send_guest_virq(ed, VIRQ_DEBUG);
}
}
extern void perfc_reset(unsigned char key);
#endif
-void do_debug_key(unsigned char key, struct xen_regs *regs)
+void do_debug_key(unsigned char key, struct cpu_user_regs *regs)
{
(void)debugger_trap_fatal(0xf001, regs);
nop(); /* Prevent the compiler doing tail call
}
}
-static void __serial_rx(unsigned char c, struct xen_regs *regs)
+static void __serial_rx(unsigned char c, struct cpu_user_regs *regs)
{
if ( xen_rx )
{
}
}
-static void serial_rx(unsigned char c, struct xen_regs *regs)
+static void serial_rx(unsigned char c, struct cpu_user_regs *regs)
{
static int switch_code_count = 0;
* PRIVATE FUNCTIONS
*/
-static void uart_rx(struct uart *uart, struct xen_regs *regs)
+static void uart_rx(struct uart *uart, struct cpu_user_regs *regs)
{
unsigned char c;
}
static void serial_interrupt(
- int irq, void *dev_id, struct xen_regs *regs)
+ int irq, void *dev_id, struct cpu_user_regs *regs)
{
uart_rx((struct uart *)dev_id, regs);
}
/* The main trap handlers use these helper macros which include early bail. */
static inline int debugger_trap_entry(
- unsigned int vector, struct xen_regs *regs)
+ unsigned int vector, struct cpu_user_regs *regs)
{
return 0;
}
static inline int debugger_trap_fatal(
- unsigned int vector, struct xen_regs *regs)
+ unsigned int vector, struct cpu_user_regs *regs)
{
return 0;
}
extern void arch_do_createdomain(struct exec_domain *);
extern int arch_final_setup_guestos(
- struct exec_domain *, full_execution_context_t *);
+ struct exec_domain *, struct vcpu_guest_context *);
extern void domain_relinquish_resources(struct domain *);
#include <asm/ptrace.h>
-#define xen_regs pt_regs
+#define cpu_user_regs pt_regs
extern void init_bsp_APIC (void);
extern void setup_local_APIC (void);
extern void init_apic_mappings (void);
-extern void smp_local_timer_interrupt (struct xen_regs * regs);
+extern void smp_local_timer_interrupt (struct cpu_user_regs * regs);
extern void setup_APIC_clocks (void);
extern void setup_apic_nmi_watchdog (void);
-extern void nmi_watchdog_tick (struct xen_regs * regs);
+extern void nmi_watchdog_tick (struct cpu_user_regs * regs);
extern void touch_nmi_watchdog(void);
extern int APIC_init_uniprocessor (void);
extern void disable_APIC_timer(void);
#define DEBUGGER_trap_fatal(_v, _r) \
if ( debugger_trap_fatal(_v, _r) ) return EXCRET_fault_fixed;
-int call_with_registers(int (*f)(struct xen_regs *r));
+int call_with_registers(int (*f)(struct cpu_user_regs *r));
#if defined(CRASH_DEBUG)
-extern int __trap_to_cdb(struct xen_regs *r);
+extern int __trap_to_cdb(struct cpu_user_regs *r);
#define debugger_trap_entry(_v, _r) (0)
#define debugger_trap_fatal(_v, _r) __trap_to_cdb(_r)
#define debugger_trap_immediate() call_with_registers(__trap_to_cdb)
#include <xen/softirq.h>
static inline int debugger_trap_entry(
- unsigned int vector, struct xen_regs *regs)
+ unsigned int vector, struct cpu_user_regs *regs)
{
struct exec_domain *ed = current;
#elif 0
-extern int kdb_trap(int, int, struct xen_regs *);
+extern int kdb_trap(int, int, struct cpu_user_regs *);
static inline int debugger_trap_entry(
- unsigned int vector, struct xen_regs *regs)
+ unsigned int vector, struct cpu_user_regs *regs)
{
return 0;
}
static inline int debugger_trap_fatal(
- unsigned int vector, struct xen_regs *regs)
+ unsigned int vector, struct cpu_user_regs *regs)
{
return kdb_trap(vector, 0, regs);
}
struct i387_state i387;
/* general user-visible register state */
- execution_context_t user_ctxt;
+ struct cpu_user_regs user_regs;
void (*schedule_tail) (struct exec_domain *);
/*
* Generic CPUID function
*/
-static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+static inline void cpuid(
+ int op, unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
{
__asm__("cpuid"
: "=a" (*eax),
#endif
-extern int gpf_emulate_4gb(struct xen_regs *regs);
+extern int gpf_emulate_4gb(struct cpu_user_regs *regs);
extern void write_ptbase(struct exec_domain *ed);
void show_guest_stack();
void show_trace(unsigned long *esp);
void show_stack(unsigned long *esp);
-void show_registers(struct xen_regs *regs);
+void show_registers(struct cpu_user_regs *regs);
void show_page_walk(unsigned long addr);
-asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs);
+asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
#endif /* !__ASSEMBLY__ */
extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
-extern int shadow_fault(unsigned long va, struct xen_regs *regs);
+extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs);
extern int shadow_mode_enable(struct domain *p, unsigned int mode);
extern void shadow_invlpg(struct exec_domain *, unsigned long);
extern struct out_of_sync_entry *shadow_mark_mfn_out_of_sync(
#include <asm/processor.h>
#include <asm/vmx_vmcs.h>
-extern void vmx_asm_vmexit_handler(struct xen_regs);
+extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
extern void vmx_asm_do_resume(void);
extern void vmx_asm_do_launch(void);
extern void vmx_intr_assist(struct exec_domain *d);
struct mi_per_cpu_info
{
unsigned long mmio_target;
- struct xen_regs *inst_decoder_regs;
+ struct cpu_user_regs *inst_decoder_regs;
};
struct virutal_platform_def {
};
extern void handle_mmio(unsigned long, unsigned long);
-extern int vmx_setup_platform(struct exec_domain *, execution_context_t *);
+extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *);
// XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame.
#define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT)))
int load_vmcs(struct arch_vmx_struct *, u64);
int store_vmcs(struct arch_vmx_struct *, u64);
void dump_vmcs(void);
-int construct_vmcs(struct arch_vmx_struct *, execution_context_t *,
- full_execution_context_t *, int);
+int construct_vmcs(struct arch_vmx_struct *, struct cpu_user_regs *,
+ struct vcpu_guest_context *, int);
#define VMCS_USE_HOST_ENV 1
#define VMCS_USE_SEPARATE_ENV 0
"pushl %edx;" \
"pushl %ecx;" \
"pushl %ebx;" \
- "testl $"STR(X86_EFLAGS_VM)","STR(XREGS_eflags)"(%esp);" \
+ "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);" \
"jz 2f;" \
"call setup_vm86_frame;" \
"jmp 3f;" \
- "2:testb $3,"STR(XREGS_cs)"(%esp);" \
+ "2:testb $3,"STR(UREGS_cs)"(%esp);" \
"jz 1f;" \
- "movl %ds,"STR(XREGS_ds)"(%esp);" \
- "movl %es,"STR(XREGS_es)"(%esp);" \
- "movl %fs,"STR(XREGS_fs)"(%esp);" \
- "movl %gs,"STR(XREGS_gs)"(%esp);" \
+ "movl %ds,"STR(UREGS_ds)"(%esp);" \
+ "movl %es,"STR(UREGS_es)"(%esp);" \
+ "movl %fs,"STR(UREGS_fs)"(%esp);" \
+ "movl %gs,"STR(UREGS_gs)"(%esp);" \
"3:"
#define SAVE_ALL_NOSEGREGS(_reg) \
pushl %edx; \
pushl %ecx; \
pushl %ebx; \
- testl $X86_EFLAGS_VM,XREGS_eflags(%esp); \
+ testl $X86_EFLAGS_VM,UREGS_eflags(%esp); \
jz 2f; \
call setup_vm86_frame; \
jmp 3f; \
- 2:testb $3,XREGS_cs(%esp); \
+ 2:testb $3,UREGS_cs(%esp); \
jz 1f; \
- movl %ds,XREGS_ds(%esp); \
- movl %es,XREGS_es(%esp); \
- movl %fs,XREGS_fs(%esp); \
- movl %gs,XREGS_gs(%esp); \
+ movl %ds,UREGS_ds(%esp); \
+ movl %es,UREGS_es(%esp); \
+ movl %fs,UREGS_fs(%esp); \
+ movl %gs,UREGS_gs(%esp); \
3:
#define SAVE_ALL_NOSEGREGS(_reg) \
#define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
-asmlinkage void x(struct xen_regs * regs); \
+asmlinkage void x(struct cpu_user_regs * regs); \
__asm__( \
"\n"__ALIGN_STR"\n" \
SYMBOL_NAME_STR(x) ":\n\t" \
struct domain;
#define STACK_RESERVED \
- (sizeof(execution_context_t) + sizeof(struct domain *))
+ (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
static inline struct exec_domain *get_current(void)
{
: : "r" (STACK_SIZE-4), "r" (ed) );
}
-static inline execution_context_t *get_execution_context(void)
+static inline struct cpu_user_regs *get_cpu_user_regs(void)
{
- execution_context_t *execution_context;
+ struct cpu_user_regs *cpu_user_regs;
__asm__ ( "andl %%esp,%0; addl %2,%0"
- : "=r" (execution_context)
+ : "=r" (cpu_user_regs)
: "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
- return execution_context;
+ return cpu_user_regs;
}
/*
#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
"movl %0,%%esp; jmp "STR(__fn) \
- : : "r" (get_execution_context()) )
+ : : "r" (get_cpu_user_regs()) )
#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
((_dpl) >= (VM86_MODE(_r) ? 3 : ((_r)->cs & 3)))
/* Number of bytes of on-stack execution state to be context-switched. */
-#define CTXT_SWITCH_STACK_BYTES (sizeof(execution_context_t))
+#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
#endif
#define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
-asmlinkage void x(struct xen_regs * regs); \
+asmlinkage void x(struct cpu_user_regs * regs); \
__asm__( \
"\n"__ALIGN_STR"\n" \
SYMBOL_NAME_STR(x) ":\n\t" \
struct domain;
#define STACK_RESERVED \
- (sizeof(execution_context_t) + sizeof(struct domain *))
+ (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
static inline struct exec_domain *get_current(void)
{
: : "r" (STACK_SIZE-8), "r" (ed) );
}
-static inline execution_context_t *get_execution_context(void)
+static inline struct cpu_user_regs *get_cpu_user_regs(void)
{
- execution_context_t *execution_context;
+ struct cpu_user_regs *cpu_user_regs;
__asm__( "andq %%rsp,%0; addq %2,%0"
- : "=r" (execution_context)
+ : "=r" (cpu_user_regs)
: "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
- return execution_context;
+ return cpu_user_regs;
}
/*
#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
"movq %0,%%rsp; jmp "STR(__fn) \
- : : "r" (get_execution_context()) )
+ : : "r" (get_cpu_user_regs()) )
#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
/* Number of bytes of on-stack execution state to be context-switched. */
/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
-#define CTXT_SWITCH_STACK_BYTES (offsetof(execution_context_t, es))
+#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
#endif
unsigned long val,
unsigned int bytes);
-struct xen_regs;
+struct cpu_user_regs;
/*
* x86_emulate_memop: Emulate an instruction that faulted attempting to
*/
extern int
x86_emulate_memop(
- struct xen_regs *regs,
+ struct cpu_user_regs *regs,
unsigned long cr2,
struct x86_mem_emulator *ops,
int mode);
*/
extern void *
decode_register(
- u8 modrm_reg, struct xen_regs *regs, int highbyte_regs);
+ u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs);
#endif /* __X86_EMULATE_H__ */
typedef struct
{
-} PACKED execution_context_t;
+} PACKED struct cpu_user_regs;
/*
* NB. This may become a 64-bit count with no shift. If this happens then the
* The following is all CPU context. Note that the i387_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
*/
-typedef struct {
+typedef struct vcpu_guest_context {
//unsigned long flags;
-} PACKED full_execution_context_t;
+} PACKED vcpu_guest_context_t;
#endif /* !__ASSEMBLY__ */
memory_t address; /* 4: code address */
} PACKED trap_info_t; /* 8 bytes */
-typedef struct xen_regs
-{
+typedef struct cpu_user_regs {
u32 ebx;
u32 ecx;
u32 edx;
u32 ds;
u32 fs;
u32 gs;
-} PACKED execution_context_t;
+} cpu_user_regs_t;
typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
* The following is all CPU context. Note that the i387_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
*/
-typedef struct {
+typedef struct vcpu_guest_context {
#define ECF_I387_VALID (1<<0)
#define ECF_VMX_GUEST (1<<1)
-#define ECF_IN_KERNEL (1<<2)
+#define ECF_IN_KERNEL (1<<2)
unsigned long flags;
- execution_context_t cpu_ctxt; /* User-level CPU registers */
+ cpu_user_regs_t user_regs; /* User-level CPU registers */
char fpu_ctxt[256]; /* User-level FPU registers */
trap_info_t trap_ctxt[256]; /* Virtual IDT */
unsigned int fast_trap_idx; /* "Fast trap" vector offset */
unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
unsigned long failsafe_callback_eip;
unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
-} PACKED full_execution_context_t;
+} PACKED vcpu_guest_context_t;
typedef struct {
/* MFN of a table of MFNs that make up p2m table */
memory_t address; /* 8: code address */
} PACKED trap_info_t; /* 16 bytes */
-typedef struct xen_regs
-{
+typedef struct cpu_user_regs {
u64 r15;
u64 r14;
u64 r13;
u64 fs_base;
u64 gs_base_kernel;
u64 gs_base_user;
-} PACKED execution_context_t;
+} cpu_user_regs_t;
typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
* The following is all CPU context. Note that the i387_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
*/
-typedef struct {
+typedef struct vcpu_guest_context {
#define ECF_I387_VALID (1<<0)
#define ECF_VMX_GUEST (1<<1)
-#define ECF_IN_KERNEL (1<<2)
+#define ECF_IN_KERNEL (1<<2)
unsigned long flags;
- execution_context_t cpu_ctxt; /* User-level CPU registers */
+ cpu_user_regs_t user_regs; /* User-level CPU registers */
char fpu_ctxt[512]; /* User-level FPU registers */
trap_info_t trap_ctxt[256]; /* Virtual IDT */
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long failsafe_callback_eip;
unsigned long syscall_callback_eip;
unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
-} PACKED full_execution_context_t;
+} PACKED vcpu_guest_context_t;
typedef struct {
/* MFN of a table of MFNs that make up p2m table */
#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */
#define DOMFLAGS_SHUTDOWNSHIFT 16
u32 flags;
- full_execution_context_t *ctxt; /* NB. IN/OUT variable. */
+ vcpu_guest_context_t *ctxt; /* NB. IN/OUT variable. */
memory_t tot_pages;
memory_t max_pages;
memory_t shared_info_frame; /* MFN of shared_info struct */
domid_t domain;
u16 exec_domain;
/* IN/OUT parameters */
- full_execution_context_t *ctxt;
+ vcpu_guest_context_t *ctxt;
} dom0_setdomaininfo_t;
#define DOM0_MSR 15
extern void arch_do_boot_vcpu(struct exec_domain *ed);
extern int arch_set_info_guest(
- struct exec_domain *d, full_execution_context_t *c);
+ struct exec_domain *d, struct vcpu_guest_context *c);
extern void free_perdomain_pt(struct domain *d);
struct irqaction
{
- void (*handler)(int, void *, struct xen_regs *);
+ void (*handler)(int, void *, struct cpu_user_regs *);
const char *name;
void *dev_id;
};
extern void free_irq(unsigned int);
extern hw_irq_controller no_irq_type;
-extern void no_action(int cpl, void *dev_id, struct xen_regs *regs);
+extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
struct domain;
struct exec_domain;
* synchronously in hard-IRQ context with interrupts disabled. The @regs
* callback parameter points at the interrupted register context.
*/
-typedef void irq_keyhandler_t(unsigned char key, struct xen_regs *regs);
+typedef void irq_keyhandler_t(unsigned char key, struct cpu_user_regs *regs);
extern void register_irq_keyhandler(
unsigned char key, irq_keyhandler_t *handler, char *desc);
/* Inject a keypress into the key-handling subsystem. */
-extern void handle_keypress(unsigned char key, struct xen_regs *regs);
+extern void handle_keypress(unsigned char key, struct cpu_user_regs *regs);
#endif /* __XEN_KEYHANDLER_H__ */
int parse_serial_handle(char *conf);
/* Register a character-receive hook on the specified COM port. */
-typedef void (*serial_rx_fn)(unsigned char, struct xen_regs *);
+typedef void (*serial_rx_fn)(unsigned char, struct cpu_user_regs *);
void serial_set_rx_handler(int handle, serial_rx_fn fn);
/* Transmit a single character via the specified COM port. */